-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathengine_begin.py
53 lines (44 loc) · 1.4 KB
/
engine_begin.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
"""
Sample script to bulk load data into SQL Server
Please read the comments and update as needed for your test case
"""
import pandas as pd
from sqlalchemy.engine import create_engine, URL
import time
# Change to your source data file path
source_file = 'C:\\Test\\Real_Estate_Sales_2001-2020_GL.csv'
# Change username, password, host, and database to the values required to connect to your database
connect_url = URL.create(
'mssql+pyodbc',
username="test",
password="1234",
host=".",
database="DA_Dev",
query=dict(driver='ODBC Driver 17 for SQL Server'))
engine = create_engine(
url=connect_url,
fast_executemany=True,
)
# Test SQL Server Connection
try:
with engine.connect() as connection:
print("Connection successful!")
except Exception as e:
print(f"Error occurred: {e}")
# Function - Execute SQL Statement
def execute_statement(sql_statement):
try:
# Execute SQL Statement
with engine.begin() as con:
con.exec_driver_sql(sql_statement)
except Exception as e:
print(f"Error occurred: {e}")
# Truncate Target Table
sql_statement = 'TRUNCATE TABLE dbo.CT_Real_Estate;'
execute_statement(sql_statement)
# Load Dataset
df = pd.read_csv (source_file, low_memory=False)
start = time.time()
df.to_sql(con=engine, schema="dbo", name="CT_Real_Estate", if_exists="replace", index=False, chunksize=1000)
end = time.time()
print(end - start)